In [ ]:
from google.colab import drive
drive.mount('/content/drive')
Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount("/content/drive", force_remount=True).

Steps and tasks:

  1. Import the libraries, load dataset, print shape of data, visualize the images in dataset. (5 Marks)

  2. Data Pre-processing: (15 Marks) a. Normalization. b. Gaussian Blurring. c. Visualize data after pre-processing.

  3. Make data compatible: (10 Marks) a. Convert labels to one-hot-vectors. b. Print the label for y_train[0]. c. Split the dataset into training, testing, and validation set. (Hint: First split images and labels into training and testing set with test_size = 0.3. Then further split test data into test and validation set with test_size = 0.5) d. Check the shape of data, Reshape data into shapes compatible with Keras models if it’s not already. If it’s already in the compatible shape, then comment in the notebook that it’s already in compatible shape.

  4. Building CNN: (15 Marks) a. Define layers. b. Set optimizer and loss function. (Use Adam optimizer and categorical crossentropy.)

  5. Fit and evaluate model and print confusion matrix. (10 Marks)

  6. Visualize predictions for x_test[2], x_test[3], x_test[33], x_test[36],x_test[59]. (5 Marks)

In [ ]:
%tensorflow_version 2.x
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Input
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import sklearn.model_selection as sk
import matplotlib.pyplot as plt
%matplotlib inline
import cv2
from google.colab.patches import cv2_imshow

import numpy as np
import pandas as pd
import seaborn as sns
tf.__version__
Out[ ]:
'2.3.0'
In [ ]:
# Initialize the random number generator
import random
random.seed(0)

# Ignore the warnings
import warnings
warnings.filterwarnings("ignore")
In [ ]:
path_data ='/content/drive/My Drive/Colab Notebooks/images.npy'
x_data = np.load(path_data)

path_label ='/content/drive/My Drive/Colab Notebooks/Labels.csv'
y_data = pd.read_csv(path_label)
In [ ]:
print(f'x shape:{x_data.shape}')
print(f'y shape:{y_data.shape}')
y_data.value_counts()
# data distribution is skewed with bottom 3 at 1/3rd the top
x shape:(4750, 128, 128, 3)
y shape:(4750, 1)
Out[ ]:
Label                    
Loose Silky-bent             654
Common Chickweed             611
Scentless Mayweed            516
Small-flowered Cranesbill    496
Fat Hen                      475
Charlock                     390
Sugar beet                   385
Cleavers                     287
Black-grass                  263
Shepherds Purse              231
Maize                        221
Common wheat                 221
dtype: int64
In [ ]:
y_data = pd.get_dummies(y_data['Label'])
print(y_data.sum().T)
Black-grass                  263
Charlock                     390
Cleavers                     287
Common Chickweed             611
Common wheat                 221
Fat Hen                      475
Loose Silky-bent             654
Maize                        221
Scentless Mayweed            516
Shepherds Purse              231
Small-flowered Cranesbill    496
Sugar beet                   385
dtype: int64
In [ ]:
class_names =y_data.columns.to_list()
In [ ]:
y_data =y_data.to_numpy()
In [ ]:
## we will look at random images just to see what we are dealign with
def show_random_examples(x, x2, y, p , num):
    # x is the original image , x2 are modified image , num is the pulitple of 4 images to show y is teh actual , p is the prediction
    indices = np.random.choice(range(x.shape[0]), num*4 , replace=False)
    factor =2 
    # if modified images present then show them next to the original image otherwise just original images
    if x is x2:
      factor =1 
    x = x[indices]
    x2 = x2[indices]
    y = y[indices]
    p = p[indices]
    plt.figure(figsize=(num*8, num*6))
    for i in range(num*4):
        plt.subplot(num *factor, 4, factor*i + 1)
        plt.imshow(x[i])
        plt.xticks([])
        plt.yticks([])
        col = 'green' if np.argmax(y[i]) == np.argmax(p[i]) else 'red'
        plt.xlabel(class_names[np.argmax(p[i])], color=col)

        if factor ==2:
          plt.subplot(num *2, 4, 2*i + 2)
          plt.imshow(x2[i])
          plt.xticks([])
          plt.yticks([])
          col = 'green' if np.argmax(y[i]) == np.argmax(p[i]) else 'red'
          plt.xlabel(class_names[np.argmax(p[i])], color=col)



    plt.show()

show_random_examples(x_data, x_data,  y_data , y_data, 3)
In [ ]:
 
In [ ]:
###now to split into train and test data
x_train, x_test, y_train, y_test = sk.train_test_split(x_data,y_data,test_size=0.3, random_state = 42)
In [ ]:
print( f' Train x:{x_train.shape} Train y:{y_train.shape}  ')
print( f' Test x:{x_test.shape} Test y:{y_test.shape}  ')
#If it’s already in the compatible shape, then comment in the notebook that it’s already in compatible shape.
 Train x:(3325, 128, 128, 3) Train y:(3325, 12)  
 Test x:(1425, 128, 128, 3) Test y:(1425, 12)  
In [ ]:
 
In [ ]:
### function to Gausiaan blur
def do_gaussian_blur(x):
  new_set = []
  for i in x:
    new_set.append(cv2.GaussianBlur(i,(5,5),cv2.BORDER_DEFAULT))
  return np.array(new_set)
In [ ]:
x_test2 = do_gaussian_blur(x_test)
x_train2 = do_gaussian_blur(x_train)

print("********* After Gaussian Blur *******")
show_random_examples(x_train, x_train2,  y_train , y_train, 3)
********* After Gaussian Blur *******
In [ ]:
### All teh seeds are Green - so maybe removing teh background is a good trick
### function to apply a green mask
### its easier to apply a greenish mask in HSV rather than in RGB


# I tried with both green_mask and without the green mask; the
# the results were better with the green mask always 

def do_green_mask(x):
  new_set = []
  for i in x:
    hsv = cv2.cvtColor(i, cv2.COLOR_BGR2HSV)
    # mask = cv2.inRange(hsv, (36, 0, 0), (70, 255,255))
    mask = cv2.inRange(hsv, (30, 0, 0), (75, 255,255))

    ## slice the green
    imask = mask>0  # filter
    green = np.zeros_like(i, np.uint8) # zeros
    green[imask] = i[imask]  # get back RGB  only for green cells
    new_set.append(green)
  return np.array(new_set)




x_test3 =do_green_mask(x_test2)
x_train3 = do_green_mask(x_train2)
show_random_examples(x_train, x_train3,  y_train , y_train, 3)
In [ ]:
#### Scale by 255
x_train5 = x_train3.astype('float32') # Conversion to float type from integer type.
x_test5 = x_test3.astype('float32')
x_train5 /= 255.0 # Division by 255
x_test5 /= 255.0
In [ ]:
print(f' x_train: {x_train[0][0][0]}')
print(f' x_train2: {x_train2[0][0][0]}')
print(f' x_train3: {x_train3[0][0][0]}')
print(f' x_train5: {x_train5[0][0][0]}')
 x_train: [122 117 121]
 x_train2: [163 155 155]
 x_train3: [0 0 0]
 x_train5: [0. 0. 0.]
In [ ]:
#Adding Early stopping callback to the fit function is going to stop the training,
#if the val_loss is not going to change even '0.001' for more than 10 continous epochs
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping

early_stopping = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=10)

#Adding Model Checkpoint callback to the fit function is going to save the weights whenever val_loss achieves a new low value. 
#Hence saving the best weights occurred during training

model_checkpoint =  ModelCheckpoint('seed_cnn_checkpoint_{epoch:02d}_loss{val_loss:.4f}.h5',
                                                           monitor='val_loss',
                                                           verbose=1,
                                                           save_best_only=True,
                                                           save_weights_only=True,
                                                           mode='auto',
                                                           period=1)
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.
In [ ]:
 
In [ ]:
input_shape = (128,128,3)
print (input_shape)
(128, 128, 3)
In [ ]:
def conv_model(input_shape,classes):
  model = Sequential()
  model.add(Input(input_shape))

  model.add(Conv2D(128,(3,3),strides=(1,1)))
  model.add(Activation("relu"))
  model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
  model.add(Dropout(0.25))

  model.add(Conv2D(64, (3,3), padding="same")) 
  model.add(Activation("relu"))
  model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
  model.add(Dropout(0.25))

  
  model.add(Conv2D(32, (3,3), padding="same"))
  model.add(BatchNormalization())
  model.add(Activation("relu"))
  model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
  
  # Output Layer
  model.add(Flatten())
  model.add(Dense(128 , activation = 'relu'))
  model.add(Dense(64 , activation = 'relu'))
  model.add(Dense(classes,activation='softmax'))
  return model


model = conv_model(input_shape=input_shape,classes=12)
In [ ]:
# initiate Adam optimizer
base_learning_rate = 0.00001
opt = keras.optimizers.Adam(learning_rate=base_learning_rate)
In [ ]:
# Let's train the model
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])
In [ ]:
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 126, 126, 128)     3584      
_________________________________________________________________
activation (Activation)      (None, 126, 126, 128)     0         
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 63, 63, 128)       0         
_________________________________________________________________
dropout (Dropout)            (None, 63, 63, 128)       0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 63, 63, 64)        73792     
_________________________________________________________________
activation_1 (Activation)    (None, 63, 63, 64)        0         
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 31, 31, 64)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 31, 31, 64)        0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 31, 31, 32)        18464     
_________________________________________________________________
batch_normalization (BatchNo (None, 31, 31, 32)        128       
_________________________________________________________________
activation_2 (Activation)    (None, 31, 31, 32)        0         
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 15, 15, 32)        0         
_________________________________________________________________
flatten (Flatten)            (None, 7200)              0         
_________________________________________________________________
dense (Dense)                (None, 128)               921728    
_________________________________________________________________
dense_1 (Dense)              (None, 64)                8256      
_________________________________________________________________
dense_2 (Dense)              (None, 12)                780       
=================================================================
Total params: 1,026,732
Trainable params: 1,026,668
Non-trainable params: 64
_________________________________________________________________
In [ ]:
 
In [ ]:
epochs = 125
history = model.fit(x_train5, y_train,
          batch_size=12 ,
          epochs=epochs,
          validation_split=0.4,
          shuffle=True,
          verbose=1,
          callbacks=[early_stopping,model_checkpoint])
Epoch 1/125
167/167 [==============================] - ETA: 0s - loss: 2.4384 - accuracy: 0.1504
Epoch 00001: val_loss improved from inf to 2.46818, saving model to seed_cnn_checkpoint_01_loss2.4682.h5
167/167 [==============================] - 4s 21ms/step - loss: 2.4384 - accuracy: 0.1504 - val_loss: 2.4682 - val_accuracy: 0.2361
Epoch 2/125
167/167 [==============================] - ETA: 0s - loss: 2.1897 - accuracy: 0.2852
Epoch 00002: val_loss improved from 2.46818 to 2.40424, saving model to seed_cnn_checkpoint_02_loss2.4042.h5
167/167 [==============================] - 3s 19ms/step - loss: 2.1897 - accuracy: 0.2852 - val_loss: 2.4042 - val_accuracy: 0.2135
Epoch 3/125
166/167 [============================>.] - ETA: 0s - loss: 2.0021 - accuracy: 0.3494
Epoch 00003: val_loss improved from 2.40424 to 2.25572, saving model to seed_cnn_checkpoint_03_loss2.2557.h5
167/167 [==============================] - 3s 19ms/step - loss: 2.0018 - accuracy: 0.3494 - val_loss: 2.2557 - val_accuracy: 0.2286
Epoch 4/125
164/167 [============================>.] - ETA: 0s - loss: 1.8620 - accuracy: 0.3836
Epoch 00004: val_loss improved from 2.25572 to 2.09436, saving model to seed_cnn_checkpoint_04_loss2.0944.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.8645 - accuracy: 0.3825 - val_loss: 2.0944 - val_accuracy: 0.2707
Epoch 5/125
166/167 [============================>.] - ETA: 0s - loss: 1.7506 - accuracy: 0.4182
Epoch 00005: val_loss improved from 2.09436 to 2.03150, saving model to seed_cnn_checkpoint_05_loss2.0315.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.7508 - accuracy: 0.4180 - val_loss: 2.0315 - val_accuracy: 0.2744
Epoch 6/125
166/167 [============================>.] - ETA: 0s - loss: 1.6871 - accuracy: 0.4302
Epoch 00006: val_loss improved from 2.03150 to 1.95167, saving model to seed_cnn_checkpoint_06_loss1.9517.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.6879 - accuracy: 0.4301 - val_loss: 1.9517 - val_accuracy: 0.2992
Epoch 7/125
167/167 [==============================] - ETA: 0s - loss: 1.5985 - accuracy: 0.4657
Epoch 00007: val_loss improved from 1.95167 to 1.93679, saving model to seed_cnn_checkpoint_07_loss1.9368.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.5985 - accuracy: 0.4657 - val_loss: 1.9368 - val_accuracy: 0.3158
Epoch 8/125
167/167 [==============================] - ETA: 0s - loss: 1.5457 - accuracy: 0.5018
Epoch 00008: val_loss improved from 1.93679 to 1.84501, saving model to seed_cnn_checkpoint_08_loss1.8450.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.5457 - accuracy: 0.5018 - val_loss: 1.8450 - val_accuracy: 0.3579
Epoch 9/125
164/167 [============================>.] - ETA: 0s - loss: 1.5155 - accuracy: 0.5000
Epoch 00009: val_loss improved from 1.84501 to 1.83706, saving model to seed_cnn_checkpoint_09_loss1.8371.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.5122 - accuracy: 0.5003 - val_loss: 1.8371 - val_accuracy: 0.3549
Epoch 10/125
164/167 [============================>.] - ETA: 0s - loss: 1.4290 - accuracy: 0.5305
Epoch 00010: val_loss improved from 1.83706 to 1.79133, saving model to seed_cnn_checkpoint_10_loss1.7913.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.4304 - accuracy: 0.5323 - val_loss: 1.7913 - val_accuracy: 0.3789
Epoch 11/125
165/167 [============================>.] - ETA: 0s - loss: 1.3816 - accuracy: 0.5540
Epoch 00011: val_loss improved from 1.79133 to 1.76038, saving model to seed_cnn_checkpoint_11_loss1.7604.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.3826 - accuracy: 0.5529 - val_loss: 1.7604 - val_accuracy: 0.3992
Epoch 12/125
167/167 [==============================] - ETA: 0s - loss: 1.3256 - accuracy: 0.5664
Epoch 00012: val_loss improved from 1.76038 to 1.70614, saving model to seed_cnn_checkpoint_12_loss1.7061.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.3256 - accuracy: 0.5664 - val_loss: 1.7061 - val_accuracy: 0.4308
Epoch 13/125
167/167 [==============================] - ETA: 0s - loss: 1.2786 - accuracy: 0.5830
Epoch 00013: val_loss improved from 1.70614 to 1.70229, saving model to seed_cnn_checkpoint_13_loss1.7023.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.2786 - accuracy: 0.5830 - val_loss: 1.7023 - val_accuracy: 0.4150
Epoch 14/125
166/167 [============================>.] - ETA: 0s - loss: 1.2278 - accuracy: 0.6185
Epoch 00014: val_loss did not improve from 1.70229
167/167 [==============================] - 3s 19ms/step - loss: 1.2284 - accuracy: 0.6180 - val_loss: 1.7060 - val_accuracy: 0.4075
Epoch 15/125
167/167 [==============================] - ETA: 0s - loss: 1.1918 - accuracy: 0.6195
Epoch 00015: val_loss improved from 1.70229 to 1.61271, saving model to seed_cnn_checkpoint_15_loss1.6127.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.1918 - accuracy: 0.6195 - val_loss: 1.6127 - val_accuracy: 0.4459
Epoch 16/125
167/167 [==============================] - ETA: 0s - loss: 1.1251 - accuracy: 0.6496
Epoch 00016: val_loss improved from 1.61271 to 1.56133, saving model to seed_cnn_checkpoint_16_loss1.5613.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.1251 - accuracy: 0.6496 - val_loss: 1.5613 - val_accuracy: 0.4767
Epoch 17/125
165/167 [============================>.] - ETA: 0s - loss: 1.0858 - accuracy: 0.6581
Epoch 00017: val_loss did not improve from 1.56133
167/167 [==============================] - 3s 19ms/step - loss: 1.0833 - accuracy: 0.6596 - val_loss: 1.5699 - val_accuracy: 0.4571
Epoch 18/125
166/167 [============================>.] - ETA: 0s - loss: 1.0390 - accuracy: 0.6777
Epoch 00018: val_loss improved from 1.56133 to 1.50609, saving model to seed_cnn_checkpoint_18_loss1.5061.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.0383 - accuracy: 0.6777 - val_loss: 1.5061 - val_accuracy: 0.4910
Epoch 19/125
164/167 [============================>.] - ETA: 0s - loss: 1.0044 - accuracy: 0.6905
Epoch 00019: val_loss improved from 1.50609 to 1.50021, saving model to seed_cnn_checkpoint_19_loss1.5002.h5
167/167 [==============================] - 3s 19ms/step - loss: 1.0049 - accuracy: 0.6902 - val_loss: 1.5002 - val_accuracy: 0.4865
Epoch 20/125
164/167 [============================>.] - ETA: 0s - loss: 0.9636 - accuracy: 0.7068
Epoch 00020: val_loss improved from 1.50021 to 1.45247, saving model to seed_cnn_checkpoint_20_loss1.4525.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.9626 - accuracy: 0.7068 - val_loss: 1.4525 - val_accuracy: 0.5105
Epoch 21/125
165/167 [============================>.] - ETA: 0s - loss: 0.9119 - accuracy: 0.7187
Epoch 00021: val_loss did not improve from 1.45247
167/167 [==============================] - 3s 19ms/step - loss: 0.9121 - accuracy: 0.7188 - val_loss: 1.4595 - val_accuracy: 0.4955
Epoch 22/125
165/167 [============================>.] - ETA: 0s - loss: 0.8728 - accuracy: 0.7288
Epoch 00022: val_loss improved from 1.45247 to 1.41713, saving model to seed_cnn_checkpoint_22_loss1.4171.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.8754 - accuracy: 0.7268 - val_loss: 1.4171 - val_accuracy: 0.5233
Epoch 23/125
165/167 [============================>.] - ETA: 0s - loss: 0.8516 - accuracy: 0.7384
Epoch 00023: val_loss improved from 1.41713 to 1.41630, saving model to seed_cnn_checkpoint_23_loss1.4163.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.8510 - accuracy: 0.7388 - val_loss: 1.4163 - val_accuracy: 0.5195
Epoch 24/125
164/167 [============================>.] - ETA: 0s - loss: 0.8201 - accuracy: 0.7495
Epoch 00024: val_loss did not improve from 1.41630
167/167 [==============================] - 3s 19ms/step - loss: 0.8185 - accuracy: 0.7499 - val_loss: 1.4372 - val_accuracy: 0.4962
Epoch 25/125
165/167 [============================>.] - ETA: 0s - loss: 0.7751 - accuracy: 0.7641
Epoch 00025: val_loss improved from 1.41630 to 1.39874, saving model to seed_cnn_checkpoint_25_loss1.3987.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.7795 - accuracy: 0.7614 - val_loss: 1.3987 - val_accuracy: 0.5180
Epoch 26/125
165/167 [============================>.] - ETA: 0s - loss: 0.7397 - accuracy: 0.7843
Epoch 00026: val_loss improved from 1.39874 to 1.33436, saving model to seed_cnn_checkpoint_26_loss1.3344.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.7407 - accuracy: 0.7840 - val_loss: 1.3344 - val_accuracy: 0.5564
Epoch 27/125
166/167 [============================>.] - ETA: 0s - loss: 0.7349 - accuracy: 0.7826
Epoch 00027: val_loss did not improve from 1.33436
167/167 [==============================] - 3s 20ms/step - loss: 0.7352 - accuracy: 0.7825 - val_loss: 1.3714 - val_accuracy: 0.5391
Epoch 28/125
165/167 [============================>.] - ETA: 0s - loss: 0.7049 - accuracy: 0.8010
Epoch 00028: val_loss did not improve from 1.33436
167/167 [==============================] - 3s 20ms/step - loss: 0.7046 - accuracy: 0.8020 - val_loss: 1.3734 - val_accuracy: 0.5331
Epoch 29/125
164/167 [============================>.] - ETA: 0s - loss: 0.6830 - accuracy: 0.7967
Epoch 00029: val_loss did not improve from 1.33436
167/167 [==============================] - 3s 20ms/step - loss: 0.6812 - accuracy: 0.7975 - val_loss: 1.3399 - val_accuracy: 0.5549
Epoch 30/125
166/167 [============================>.] - ETA: 0s - loss: 0.6586 - accuracy: 0.8057
Epoch 00030: val_loss improved from 1.33436 to 1.28685, saving model to seed_cnn_checkpoint_30_loss1.2868.h5
167/167 [==============================] - 3s 20ms/step - loss: 0.6585 - accuracy: 0.8055 - val_loss: 1.2868 - val_accuracy: 0.5774
Epoch 31/125
166/167 [============================>.] - ETA: 0s - loss: 0.6430 - accuracy: 0.8208
Epoch 00031: val_loss improved from 1.28685 to 1.28179, saving model to seed_cnn_checkpoint_31_loss1.2818.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.6425 - accuracy: 0.8211 - val_loss: 1.2818 - val_accuracy: 0.5752
Epoch 32/125
164/167 [============================>.] - ETA: 0s - loss: 0.6125 - accuracy: 0.8283
Epoch 00032: val_loss did not improve from 1.28179
167/167 [==============================] - 3s 19ms/step - loss: 0.6135 - accuracy: 0.8281 - val_loss: 1.2829 - val_accuracy: 0.5812
Epoch 33/125
165/167 [============================>.] - ETA: 0s - loss: 0.5779 - accuracy: 0.8414
Epoch 00033: val_loss improved from 1.28179 to 1.27673, saving model to seed_cnn_checkpoint_33_loss1.2767.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.5791 - accuracy: 0.8411 - val_loss: 1.2767 - val_accuracy: 0.5805
Epoch 34/125
164/167 [============================>.] - ETA: 0s - loss: 0.5779 - accuracy: 0.8349
Epoch 00034: val_loss did not improve from 1.27673
167/167 [==============================] - 3s 19ms/step - loss: 0.5783 - accuracy: 0.8361 - val_loss: 1.3393 - val_accuracy: 0.5594
Epoch 35/125
167/167 [==============================] - ETA: 0s - loss: 0.5551 - accuracy: 0.8461
Epoch 00035: val_loss improved from 1.27673 to 1.24488, saving model to seed_cnn_checkpoint_35_loss1.2449.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.5551 - accuracy: 0.8461 - val_loss: 1.2449 - val_accuracy: 0.6000
Epoch 36/125
165/167 [============================>.] - ETA: 0s - loss: 0.5290 - accuracy: 0.8687
Epoch 00036: val_loss improved from 1.24488 to 1.23235, saving model to seed_cnn_checkpoint_36_loss1.2324.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.5295 - accuracy: 0.8682 - val_loss: 1.2324 - val_accuracy: 0.6045
Epoch 37/125
164/167 [============================>.] - ETA: 0s - loss: 0.5218 - accuracy: 0.8623
Epoch 00037: val_loss did not improve from 1.23235
167/167 [==============================] - 3s 19ms/step - loss: 0.5215 - accuracy: 0.8622 - val_loss: 1.2749 - val_accuracy: 0.5812
Epoch 38/125
167/167 [==============================] - ETA: 0s - loss: 0.5142 - accuracy: 0.8591
Epoch 00038: val_loss did not improve from 1.23235
167/167 [==============================] - 3s 19ms/step - loss: 0.5142 - accuracy: 0.8591 - val_loss: 1.2404 - val_accuracy: 0.5977
Epoch 39/125
167/167 [==============================] - ETA: 0s - loss: 0.4867 - accuracy: 0.8672
Epoch 00039: val_loss did not improve from 1.23235
167/167 [==============================] - 3s 19ms/step - loss: 0.4867 - accuracy: 0.8672 - val_loss: 1.2611 - val_accuracy: 0.5902
Epoch 40/125
165/167 [============================>.] - ETA: 0s - loss: 0.4793 - accuracy: 0.8727
Epoch 00040: val_loss did not improve from 1.23235
167/167 [==============================] - 3s 19ms/step - loss: 0.4793 - accuracy: 0.8722 - val_loss: 1.2700 - val_accuracy: 0.5872
Epoch 41/125
166/167 [============================>.] - ETA: 0s - loss: 0.4573 - accuracy: 0.8800
Epoch 00041: val_loss did not improve from 1.23235
167/167 [==============================] - 3s 19ms/step - loss: 0.4568 - accuracy: 0.8802 - val_loss: 1.2382 - val_accuracy: 0.6015
Epoch 42/125
167/167 [==============================] - ETA: 0s - loss: 0.4421 - accuracy: 0.8812
Epoch 00042: val_loss did not improve from 1.23235
167/167 [==============================] - 3s 19ms/step - loss: 0.4421 - accuracy: 0.8812 - val_loss: 1.2566 - val_accuracy: 0.5977
Epoch 43/125
164/167 [============================>.] - ETA: 0s - loss: 0.4340 - accuracy: 0.8857
Epoch 00043: val_loss did not improve from 1.23235
167/167 [==============================] - 3s 19ms/step - loss: 0.4350 - accuracy: 0.8847 - val_loss: 1.2751 - val_accuracy: 0.5872
Epoch 44/125
165/167 [============================>.] - ETA: 0s - loss: 0.4221 - accuracy: 0.8869
Epoch 00044: val_loss improved from 1.23235 to 1.22247, saving model to seed_cnn_checkpoint_44_loss1.2225.h5
167/167 [==============================] - 3s 20ms/step - loss: 0.4231 - accuracy: 0.8862 - val_loss: 1.2225 - val_accuracy: 0.6090
Epoch 45/125
164/167 [============================>.] - ETA: 0s - loss: 0.4047 - accuracy: 0.9009
Epoch 00045: val_loss improved from 1.22247 to 1.21652, saving model to seed_cnn_checkpoint_45_loss1.2165.h5
167/167 [==============================] - 3s 20ms/step - loss: 0.4051 - accuracy: 0.9003 - val_loss: 1.2165 - val_accuracy: 0.6105
Epoch 46/125
165/167 [============================>.] - ETA: 0s - loss: 0.3863 - accuracy: 0.9040
Epoch 00046: val_loss improved from 1.21652 to 1.21039, saving model to seed_cnn_checkpoint_46_loss1.2104.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.3891 - accuracy: 0.9028 - val_loss: 1.2104 - val_accuracy: 0.6180
Epoch 47/125
166/167 [============================>.] - ETA: 0s - loss: 0.3785 - accuracy: 0.9041
Epoch 00047: val_loss did not improve from 1.21039
167/167 [==============================] - 3s 19ms/step - loss: 0.3780 - accuracy: 0.9043 - val_loss: 1.2515 - val_accuracy: 0.6008
Epoch 48/125
164/167 [============================>.] - ETA: 0s - loss: 0.3726 - accuracy: 0.9090
Epoch 00048: val_loss did not improve from 1.21039
167/167 [==============================] - 3s 19ms/step - loss: 0.3745 - accuracy: 0.9078 - val_loss: 1.2596 - val_accuracy: 0.5977
Epoch 49/125
166/167 [============================>.] - ETA: 0s - loss: 0.3731 - accuracy: 0.9167
Epoch 00049: val_loss did not improve from 1.21039
167/167 [==============================] - 3s 19ms/step - loss: 0.3733 - accuracy: 0.9168 - val_loss: 1.2473 - val_accuracy: 0.6060
Epoch 50/125
167/167 [==============================] - ETA: 0s - loss: 0.3443 - accuracy: 0.9188
Epoch 00050: val_loss did not improve from 1.21039
167/167 [==============================] - 3s 19ms/step - loss: 0.3443 - accuracy: 0.9188 - val_loss: 1.2551 - val_accuracy: 0.5962
Epoch 51/125
167/167 [==============================] - ETA: 0s - loss: 0.3347 - accuracy: 0.9263
Epoch 00051: val_loss did not improve from 1.21039
167/167 [==============================] - 3s 19ms/step - loss: 0.3347 - accuracy: 0.9263 - val_loss: 1.2509 - val_accuracy: 0.6023
Epoch 52/125
166/167 [============================>.] - ETA: 0s - loss: 0.3323 - accuracy: 0.9217
Epoch 00052: val_loss improved from 1.21039 to 1.20245, saving model to seed_cnn_checkpoint_52_loss1.2025.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.3334 - accuracy: 0.9208 - val_loss: 1.2025 - val_accuracy: 0.6188
Epoch 53/125
166/167 [============================>.] - ETA: 0s - loss: 0.3250 - accuracy: 0.9262
Epoch 00053: val_loss did not improve from 1.20245
167/167 [==============================] - 3s 19ms/step - loss: 0.3252 - accuracy: 0.9263 - val_loss: 1.2544 - val_accuracy: 0.6008
Epoch 54/125
165/167 [============================>.] - ETA: 0s - loss: 0.3061 - accuracy: 0.9283
Epoch 00054: val_loss did not improve from 1.20245
167/167 [==============================] - 3s 19ms/step - loss: 0.3074 - accuracy: 0.9278 - val_loss: 1.2902 - val_accuracy: 0.5925
Epoch 55/125
166/167 [============================>.] - ETA: 0s - loss: 0.2920 - accuracy: 0.9307
Epoch 00055: val_loss did not improve from 1.20245
167/167 [==============================] - 3s 19ms/step - loss: 0.2916 - accuracy: 0.9308 - val_loss: 1.2104 - val_accuracy: 0.6158
Epoch 56/125
166/167 [============================>.] - ETA: 0s - loss: 0.2864 - accuracy: 0.9372
Epoch 00056: val_loss did not improve from 1.20245
167/167 [==============================] - 3s 19ms/step - loss: 0.2869 - accuracy: 0.9373 - val_loss: 1.2225 - val_accuracy: 0.6098
Epoch 57/125
164/167 [============================>.] - ETA: 0s - loss: 0.2744 - accuracy: 0.9405
Epoch 00057: val_loss did not improve from 1.20245
167/167 [==============================] - 3s 19ms/step - loss: 0.2728 - accuracy: 0.9414 - val_loss: 1.2416 - val_accuracy: 0.6000
Epoch 58/125
166/167 [============================>.] - ETA: 0s - loss: 0.2741 - accuracy: 0.9408
Epoch 00058: val_loss did not improve from 1.20245
167/167 [==============================] - 3s 19ms/step - loss: 0.2743 - accuracy: 0.9409 - val_loss: 1.2061 - val_accuracy: 0.6188
Epoch 59/125
164/167 [============================>.] - ETA: 0s - loss: 0.2579 - accuracy: 0.9487
Epoch 00059: val_loss did not improve from 1.20245
167/167 [==============================] - 3s 19ms/step - loss: 0.2615 - accuracy: 0.9474 - val_loss: 1.2114 - val_accuracy: 0.6173
Epoch 60/125
166/167 [============================>.] - ETA: 0s - loss: 0.2528 - accuracy: 0.9428
Epoch 00060: val_loss did not improve from 1.20245
167/167 [==============================] - 3s 19ms/step - loss: 0.2533 - accuracy: 0.9424 - val_loss: 1.2195 - val_accuracy: 0.6090
Epoch 61/125
165/167 [============================>.] - ETA: 0s - loss: 0.2414 - accuracy: 0.9495
Epoch 00061: val_loss did not improve from 1.20245
167/167 [==============================] - 3s 19ms/step - loss: 0.2416 - accuracy: 0.9494 - val_loss: 1.2472 - val_accuracy: 0.6030
Epoch 62/125
166/167 [============================>.] - ETA: 0s - loss: 0.2354 - accuracy: 0.9558
Epoch 00062: val_loss improved from 1.20245 to 1.18890, saving model to seed_cnn_checkpoint_62_loss1.1889.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.2352 - accuracy: 0.9559 - val_loss: 1.1889 - val_accuracy: 0.6323
Epoch 63/125
165/167 [============================>.] - ETA: 0s - loss: 0.2346 - accuracy: 0.9556
Epoch 00063: val_loss did not improve from 1.18890
167/167 [==============================] - 3s 19ms/step - loss: 0.2360 - accuracy: 0.9549 - val_loss: 1.2247 - val_accuracy: 0.6165
Epoch 64/125
166/167 [============================>.] - ETA: 0s - loss: 0.2268 - accuracy: 0.9523
Epoch 00064: val_loss did not improve from 1.18890
167/167 [==============================] - 3s 19ms/step - loss: 0.2268 - accuracy: 0.9524 - val_loss: 1.1951 - val_accuracy: 0.6301
Epoch 65/125
164/167 [============================>.] - ETA: 0s - loss: 0.2186 - accuracy: 0.9548
Epoch 00065: val_loss did not improve from 1.18890
167/167 [==============================] - 3s 19ms/step - loss: 0.2174 - accuracy: 0.9554 - val_loss: 1.2245 - val_accuracy: 0.6195
Epoch 66/125
166/167 [============================>.] - ETA: 0s - loss: 0.2119 - accuracy: 0.9613
Epoch 00066: val_loss improved from 1.18890 to 1.18201, saving model to seed_cnn_checkpoint_66_loss1.1820.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.2131 - accuracy: 0.9609 - val_loss: 1.1820 - val_accuracy: 0.6308
Epoch 67/125
164/167 [============================>.] - ETA: 0s - loss: 0.2022 - accuracy: 0.9639
Epoch 00067: val_loss improved from 1.18201 to 1.17952, saving model to seed_cnn_checkpoint_67_loss1.1795.h5
167/167 [==============================] - 3s 19ms/step - loss: 0.2012 - accuracy: 0.9644 - val_loss: 1.1795 - val_accuracy: 0.6346
Epoch 68/125
164/167 [============================>.] - ETA: 0s - loss: 0.2017 - accuracy: 0.9599
Epoch 00068: val_loss did not improve from 1.17952
167/167 [==============================] - 3s 19ms/step - loss: 0.2017 - accuracy: 0.9599 - val_loss: 1.2298 - val_accuracy: 0.6135
Epoch 69/125
167/167 [==============================] - ETA: 0s - loss: 0.1888 - accuracy: 0.9654
Epoch 00069: val_loss did not improve from 1.17952
167/167 [==============================] - 3s 19ms/step - loss: 0.1888 - accuracy: 0.9654 - val_loss: 1.2207 - val_accuracy: 0.6180
Epoch 70/125
167/167 [==============================] - ETA: 0s - loss: 0.1857 - accuracy: 0.9719
Epoch 00070: val_loss did not improve from 1.17952
167/167 [==============================] - 3s 19ms/step - loss: 0.1857 - accuracy: 0.9719 - val_loss: 1.1857 - val_accuracy: 0.6361
Epoch 71/125
167/167 [==============================] - ETA: 0s - loss: 0.1750 - accuracy: 0.9669
Epoch 00071: val_loss did not improve from 1.17952
167/167 [==============================] - 3s 19ms/step - loss: 0.1750 - accuracy: 0.9669 - val_loss: 1.2290 - val_accuracy: 0.6241
Epoch 72/125
166/167 [============================>.] - ETA: 0s - loss: 0.1684 - accuracy: 0.9724
Epoch 00072: val_loss did not improve from 1.17952
167/167 [==============================] - 3s 19ms/step - loss: 0.1686 - accuracy: 0.9724 - val_loss: 1.2194 - val_accuracy: 0.6286
Epoch 73/125
165/167 [============================>.] - ETA: 0s - loss: 0.1696 - accuracy: 0.9737
Epoch 00073: val_loss did not improve from 1.17952
167/167 [==============================] - 3s 19ms/step - loss: 0.1703 - accuracy: 0.9734 - val_loss: 1.2227 - val_accuracy: 0.6203
Epoch 74/125
165/167 [============================>.] - ETA: 0s - loss: 0.1514 - accuracy: 0.9737
Epoch 00074: val_loss did not improve from 1.17952
167/167 [==============================] - 3s 19ms/step - loss: 0.1513 - accuracy: 0.9739 - val_loss: 1.1804 - val_accuracy: 0.6459
Epoch 75/125
165/167 [============================>.] - ETA: 0s - loss: 0.1550 - accuracy: 0.9783
Epoch 00075: val_loss did not improve from 1.17952
167/167 [==============================] - 3s 19ms/step - loss: 0.1562 - accuracy: 0.9784 - val_loss: 1.2074 - val_accuracy: 0.6331
Epoch 76/125
166/167 [============================>.] - ETA: 0s - loss: 0.1481 - accuracy: 0.9779
Epoch 00076: val_loss did not improve from 1.17952
167/167 [==============================] - 3s 19ms/step - loss: 0.1483 - accuracy: 0.9779 - val_loss: 1.2315 - val_accuracy: 0.6248
Epoch 77/125
164/167 [============================>.] - ETA: 0s - loss: 0.1403 - accuracy: 0.9817
Epoch 00077: val_loss did not improve from 1.17952
167/167 [==============================] - 3s 19ms/step - loss: 0.1415 - accuracy: 0.9810 - val_loss: 1.2452 - val_accuracy: 0.6195
In [ ]:
def show_final_history(history):
    fig, ax = plt.subplots(1,2,figsize=(15,5))
    ax[0].set_title("Loss")
    ax[1].set_title("Accuracy")
    ax[0].plot(history.history["loss"],label="Loss")
    ax[0].plot(history.history["val_loss"],label="Test Loss")
    ax[1].plot(history.history["accuracy"],label="Accuracy")
    ax[1].plot(history.history["val_accuracy"],label="Test Accuracy")
    ax[0].legend(loc="upper right")
    ax[1].legend(loc="lower right")

show_final_history(history)
In [ ]:
##### confusion matrix 
from sklearn.metrics import classification_report,confusion_matrix
y_pred = model.predict(x_test5)
valid_preds = np.argmax(y_pred, axis = 1) 
y_check = np.argmax(y_test, axis = 1) 
cnf_matrix = confusion_matrix(y_check, valid_preds)
In [ ]:
print ( list(enumerate(class_names)))
fig, ax = plt.subplots(1)
ax = sns.heatmap(cnf_matrix, ax=ax, cmap=plt.cm.Blues, annot=True)
plt.title('Confusion Matrix')
plt.ylabel('True class')
plt.xlabel('Predicted class')#
plt.show()

## so you can see
## Charloc/CLeavers -  often gets misclassified
## Black grass and Loose Silky-bent gets misclassified
[(0, 'Black-grass'), (1, 'Charlock'), (2, 'Cleavers'), (3, 'Common Chickweed'), (4, 'Common wheat'), (5, 'Fat Hen'), (6, 'Loose Silky-bent'), (7, 'Maize'), (8, 'Scentless Mayweed'), (9, 'Shepherds Purse'), (10, 'Small-flowered Cranesbill'), (11, 'Sugar beet')]
In [ ]:
 
In [ ]:
# Score trained model.
scores = model.evaluate(x_test5, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
45/45 [==============================] - 0s 11ms/step - loss: 1.1175 - accuracy: 0.6407
Test loss: 1.1175469160079956
Test accuracy: 0.6407017707824707
In [ ]:
#Visualize predictions for x_test[2], x_test[3], x_test[33], x_test[36],x_test[59]. (5 Marks)
index = [2,3,33,36,59]
j = 0
plt.figure(figsize=(25,25))
for i in index:
  plt.subplot(len(index) , 2, 2*j + 1)
  plt.imshow(x_test[i])
  plt.xticks([])
  plt.yticks([])
  plt.title("x_test[" + str(i) +"] Original")
  col = 'green' if np.argmax(y_pred[i]) == np.argmax(y_test[i]) else 'red'
  plt.xlabel(class_names[np.argmax(y_test[i])], color=col)

  plt.subplot(len(index) , 2, 2*j + 2)
  plt.imshow(x_test5[i])
  plt.xticks([])
  plt.yticks([])
  plt.title("x_test[" + str(i) +"] Input")
  col = 'green' if np.argmax(y_pred[i]) == np.argmax(y_test[i]) else 'red'
  plt.xlabel(class_names[np.argmax(y_pred[i])], color=col)

  j = j +1
plt.show
Out[ ]:
<function matplotlib.pyplot.show>